import os
current_directory = os.getcwd()
print(current_directory)
/Users/macbookpro/hdd/MSc/Dissertation/multilabeltextclassification
import numpy
numpy.version.version
'1.22.0'
# importing libraries
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
#from keras.preprocessing.sequence import pad_sequences
#from keras_preprocessing.sequence import pad_sequences
#from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.utils import pad_sequences
from tensorflow.keras.preprocessing import text, sequence
from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers
from tensorflow.python.keras.models import Model, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GRU, Dense, Input, LSTM, Embedding, Dropout, SpatialDropout1D, Activation, SimpleRNN
from tensorflow.keras.layers import Conv1D, Bidirectional, GlobalMaxPool1D, MaxPooling1D, BatchNormalization
from tensorflow.keras.optimizers import Adam
#from tensorflow.keras.optimizers import SGD
# For custom metrics
import keras.backend as K
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns
from IPython.display import Image
from tqdm import tqdm
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
import os, re, csv, math, codecs
from nltk.tokenize import word_tokenize
import string
import gensim
sns.set_style("whitegrid")
np.random.seed(0)
2023-04-05 23:07:40.346238: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
#df1 = pd.read_csv('/Users/macbookpro/hdd/MSc/Dissertation/multilabeltextclassification/githubissuedata2M.csv')
df = pd.read_csv('/Users/macbookpro/hdd/MSc/Dissertation/multilabeltextclassification/githubissuedata.csv')
df.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 226163 entries, 0 to 226162 Data columns (total 17 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 title 225152 non-null object 1 body 225866 non-null object 2 task 226163 non-null int64 3 bug 226163 non-null int64 4 documentation 226163 non-null int64 5 duplicate 226163 non-null int64 6 enhancement 226163 non-null int64 7 good_first_issue 226163 non-null int64 8 help_wanted 226163 non-null int64 9 invalid 226163 non-null int64 10 question 226163 non-null int64 11 wontfix 226163 non-null int64 12 gitalk 226163 non-null int64 13 priority_medium 226163 non-null int64 14 priority_high 226163 non-null int64 15 feature_request 226163 non-null int64 16 feature 226163 non-null int64 dtypes: int64(15), object(2) memory usage: 29.3+ MB
df.head(10)
| title | body | task | bug | documentation | duplicate | enhancement | good_first_issue | help_wanted | invalid | question | wontfix | gitalk | priority_medium | priority_high | feature_request | feature | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | My Account Paid laptop 1440 resolution Updat... | Case:Distance between Registered email address... | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 1 | How to fix sleepimpl warning when ECS credenti... | Prerequisites X Ive searched for previous sim... | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 2 | Slider doesnt work on touch devices | DescriptionSlider should work dragging in tou... | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 3 | add new labels | DescriptionAdd ui and logic to permanently ad... | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 4 | No lib sub folder in Boost folder | Hi I am following thishttps://www.mlpack.org/d... | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
| 5 | Add license notice to CLI | The CLI is missing the license notice. Theres ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| 6 | Should show Powershell or AzureCLI code necess... | There is example output from Powershell and Az... | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 7 | tidboperator could not work with kubernetes 1.23 | Bug ReportWhat version of Kubernetes are you ... | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 8 | Match Live | x Implement game logic x Calculate results ba... | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 9 | AngularBug Make current location widget more g... | If youve never submitted an issue to the SORMA... | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
# check missing values in the dataset
print('The dataset has', df.isna().sum().sum(), 'missing values in test data.')
# check any duplicate records in the dataset
print('The dataset has', df.duplicated().sum(), 'duplicates in train data.')
The dataset has 1308 missing values in test data. The dataset has 46411 duplicates in train data.
# remove missing values in the dataset
df.dropna(inplace= True)
# remove all duplicate records in the dataset
df.drop_duplicates(inplace= True)
# check missing values in the dataset
print('The dataset has', df.isna().sum().sum(), 'missing values in test data.')
# check any duplicate records in the dataset
print('The dataset has', df.duplicated().sum(), 'duplicates in train data.')
The dataset has 0 missing values in test data. The dataset has 0 duplicates in train data.
# spliting dataset to train and test
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(df, test_size=0.2, random_state=25)
test_df.shape
(35787, 17)
train_df.shape
(143145, 17)
# DATA PREPROCESSING
#Deep Neural Networks input layers make use of input variables to feed the network for training the model. But in this task (experiment), we're dealing with words text. How do we represent these words in order to feed our model?
#In our experiment, we used densed representation of those text (comments) and their semanticity together. The advantage of using this approach is the best way for fitting neural networks onto a text data (as in our case), as well as less memory usage compared to other sparse representation approaches.
#Word Embedding
#Two ways to feed embeddings to neural networks:
#Using your own word embeddings by training
#Using pre-trained embedding (e.g Word2vec, lad2vec, Glove etc)
input_shape = (1000,)
input_shape[0]
1000
#Convert text to vectors using keras preprocessing library tools
X_train = train_df["body"].values
X_test = test_df["body"].values
y_train = train_df[["task","bug","documentation","duplicate","enhancement","good_first_issue","help_wanted","invalid","question","wontfix","gitalk","priority_medium","priority_high","feature_request","feature"]].values
y_test = test_df[["task","bug","documentation","duplicate","enhancement","good_first_issue","help_wanted","invalid","question","wontfix","gitalk","priority_medium","priority_high","feature_request","feature"]].values
#For the first embedding, we used keras preprocessing (Text Preprocessing) libraries.
#This class allows to vectorize a text corpus, by turning each text into either a sequence of integers
#(each integer being the index of a token in a dictionary) or into a vector where the coefficient for
#each token could be binary, based on word count, based on tf-idf
num_words = 20000 #Max. words to use per issue label
max_features = 200000 #Max. number of unique words in embeddinbg vector
max_len = 500 #Max. number of words per issue to be use
embedding_dims = 128 #embedding vector output dimension
num_epochs = 25 # (before 5)number of epochs (number of times that the model is exposed to the training dataset)
val_split = 0.1
batch_size2 = 256 #(before 32)The **batch size** is the number of training examples in one forward/backward pass.
# In general, larger batch sizes result in faster progress in training, but don't always converge as quickly.
#Smaller batch sizes train slower, but can converge faster. And the higher the batch size, the more memory space you’ll need.
#Issue body Tokenization
tokenizer = tokenizer = Tokenizer(num_words)
tokenizer.fit_on_texts(list(X_train))
#Convert tokenized issue body text to sequnces
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
# padding the sequences
X_train = pad_sequences(X_train, max_len)
X_test = pad_sequences(X_test, max_len)
print('X_train shape:', X_train.shape)
print('X_test shape: ', X_test.shape)
X_train shape: (143145, 500) X_test shape: (35787, 500)
#Writing functions for Precision, Recall, F1-Measure, AUC, mean etc evaluaiton metrics to evaluate the model
#Import necessary libraries
4# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.datasets import make_circles
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
def precision(y_true, y_pred):
#Calculating precision, a metric for multi-label classification of how many selected items are relevant.
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
#Calculating recall, a metric for multi-label classification of how many relevant items are selected.
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
#Customized the evaluation to analyse the model in terms of accuracy and mean value accuracy
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0.0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def auroc(y_true, y_pred):
auc = tf.keras.metrics.AUC(y_true, y_pred)[1]
#auc = tf.metrics.auc(y_true, y_pred)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
def fmeasure(y_true, y_pred):
#Calculates the f-measure, the harmonic mean of precision and recall.
return fbeta_score(y_true, y_pred, beta=1)
fscore = f1score = fmeasure
# Define input shape and embedding size
input_shape = (500,)
# Define input layer
inputs = Input(shape=input_shape)
# Add embedding layer
x = Embedding(input_dim=max_features, output_dim=embedding_dims, input_length=input_shape[0])(inputs)
# Add convolutional layers with max pooling
x = Conv1D(filters=64, kernel_size=3, activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=128, kernel_size=3, activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
# Add recurrent layer
x = LSTM(units=64)(x)
# Add output layer
outputs = Dense(15, activation='sigmoid')(x)
# Define and compile model
CNN_LSTM_model = Model(inputs=inputs, outputs=outputs)
CNN_LSTM_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', mean_pred, fmeasure, precision, recall])
# Print model summary
CNN_LSTM_model.summary()
Model: "model_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= Total params: 25,699,727 Trainable params: 25,699,727 Non-trainable params: 0 _________________________________________________________________
2023-04-05 23:36:22.171257: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-05 23:36:22.173302: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-05 23:36:22.174937: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
#We used early callback functionality that allows you to specify the performance measure to monitor, the trigger, and once triggered. It will stop the training process.
early = EarlyStopping(monitor="val_loss", mode="min", patience=4)
CNN_LSTM_model_fit = CNN_LSTM_model.fit(X_train, y_train, batch_size=batch_size2, epochs=num_epochs, validation_split=val_split, callbacks=[early])
Epoch 1/25
2023-04-05 23:36:29.023259: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-05 23:36:29.025409: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-05 23:36:29.026996: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-05 23:36:30.082297: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-05 23:36:30.084392: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-05 23:36:30.085930: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
504/504 [==============================] - ETA: 0s - loss: 0.1961 - accuracy: 0.5424 - mean_pred: 0.0936 - fmeasure: 0.3972 - precision: 0.6646 - recall: 0.3016
2023-04-05 23:42:40.101819: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-05 23:42:40.103872: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-05 23:42:40.105619: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
504/504 [==============================] - 381s 750ms/step - loss: 0.1961 - accuracy: 0.5424 - mean_pred: 0.0936 - fmeasure: 0.3972 - precision: 0.6646 - recall: 0.3016 - val_loss: 0.1699 - val_accuracy: 0.6209 - val_mean_pred: 0.0824 - val_fmeasure: 0.5306 - val_precision: 0.7303 - val_recall: 0.4169 Epoch 2/25 504/504 [==============================] - 397s 787ms/step - loss: 0.1645 - accuracy: 0.6404 - mean_pred: 0.0809 - fmeasure: 0.5633 - precision: 0.7375 - recall: 0.4566 - val_loss: 0.1637 - val_accuracy: 0.6351 - val_mean_pred: 0.0816 - val_fmeasure: 0.5564 - val_precision: 0.7349 - val_recall: 0.4480 Epoch 3/25 504/504 [==============================] - 387s 768ms/step - loss: 0.1580 - accuracy: 0.6595 - mean_pred: 0.0809 - fmeasure: 0.5861 - precision: 0.7552 - recall: 0.4795 - val_loss: 0.1611 - val_accuracy: 0.6399 - val_mean_pred: 0.0818 - val_fmeasure: 0.5681 - val_precision: 0.7317 - val_recall: 0.4645 Epoch 4/25 504/504 [==============================] - 394s 781ms/step - loss: 0.1520 - accuracy: 0.6716 - mean_pred: 0.0809 - fmeasure: 0.6006 - precision: 0.7713 - recall: 0.4924 - val_loss: 0.1593 - val_accuracy: 0.6390 - val_mean_pred: 0.0795 - val_fmeasure: 0.5711 - val_precision: 0.7394 - val_recall: 0.4655 Epoch 5/25 504/504 [==============================] - 387s 768ms/step - loss: 0.1449 - accuracy: 0.6908 - mean_pred: 0.0809 - fmeasure: 0.6269 - precision: 0.7970 - recall: 0.5172 - val_loss: 0.1590 - val_accuracy: 0.6456 - val_mean_pred: 0.0800 - val_fmeasure: 0.5748 - val_precision: 0.7537 - val_recall: 0.4650 Epoch 6/25 504/504 [==============================] - 432s 858ms/step - loss: 0.1368 - accuracy: 0.7156 - mean_pred: 0.0809 - fmeasure: 0.6561 - precision: 0.8192 - recall: 0.5478 - val_loss: 0.1598 - val_accuracy: 0.6424 - val_mean_pred: 0.0813 - val_fmeasure: 0.5844 - val_precision: 0.7382 - val_recall: 0.4839 Epoch 7/25 504/504 [==============================] - 373s 740ms/step - loss: 0.1286 - accuracy: 0.7371 - mean_pred: 0.0809 - fmeasure: 0.6847 - precision: 0.8381 - recall: 0.5793 - val_loss: 0.1609 - val_accuracy: 0.6448 - val_mean_pred: 0.0814 - val_fmeasure: 0.5993 - val_precision: 0.7334 - val_recall: 0.5070 Epoch 8/25 504/504 [==============================] - 373s 740ms/step - loss: 0.1200 - accuracy: 0.7591 - mean_pred: 0.0809 - fmeasure: 0.7110 - precision: 0.8573 - recall: 0.6079 - val_loss: 0.1655 - val_accuracy: 0.6400 - val_mean_pred: 0.0804 - val_fmeasure: 0.5987 - val_precision: 0.7237 - val_recall: 0.5108 Epoch 9/25 504/504 [==============================] - 372s 738ms/step - loss: 0.1114 - accuracy: 0.7811 - mean_pred: 0.0809 - fmeasure: 0.7369 - precision: 0.8745 - recall: 0.6372 - val_loss: 0.1701 - val_accuracy: 0.6425 - val_mean_pred: 0.0798 - val_fmeasure: 0.5999 - val_precision: 0.7209 - val_recall: 0.5139
# Plot training & validation accuracy values
plt.plot(CNN_LSTM_model_fit.history['accuracy'])
plt.plot(CNN_LSTM_model_fit.history['val_accuracy'])
plt.title('CNN-LSTM Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training Accuracy', 'Validation Accuracy'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(CNN_LSTM_model_fit.history['loss'])
plt.plot(CNN_LSTM_model_fit.history['val_loss'])
plt.title('CNN-LSTM Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training Loss', 'Validation Loss'], loc='lower right')
plt.show()
from tensorflow.keras.layers import concatenate
# Define input shape and embedding size
input_shape = (500,)
# Define input layer
inputs = Input(shape=input_shape)
# Add embedding layer
x = Embedding(input_dim=max_features, output_dim=embedding_dims, input_length=input_shape[0])(inputs)
# Add LSTM layer
lstm_out = LSTM(units=64)(x)
# Add GRU layer
gru_out = GRU(units=64)(x)
# Concatenate LSTM and GRU outputs
concat_out = concatenate([lstm_out, gru_out])
# Add output layer
outputs = Dense(15, activation='sigmoid')(concat_out)
# Define and compile model
LSTM_GRU_model = Model(inputs=inputs, outputs=outputs)
LSTM_GRU_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', mean_pred, fmeasure, precision, recall])
# Print model summary
LSTM_GRU_model.summary()
2023-04-06 00:34:44.143232: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:44.145182: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:44.146974: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-06 00:34:44.330351: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:44.332203: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:44.333795: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
Model: "model_5" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== Total params: 25,688,591 Trainable params: 25,688,591 Non-trainable params: 0 __________________________________________________________________________________________________
##We use cross validation to split arrays or matrices of train data into random train and validation subsets
X_tra, X_val, y_tra, y_val = train_test_split(X_train, y_train, train_size =0.9, random_state=233)
LSTM_GRU_model_fit = LSTM_GRU_model.fit(X_train, y_train, batch_size=batch_size2, epochs=num_epochs, validation_data=(X_val, y_val), callbacks=[early])
Epoch 1/25
2023-04-06 00:34:44.851044: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:44.853008: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:44.855045: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-06 00:34:45.031704: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:45.033339: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:45.035040: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-06 00:34:46.005089: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:46.007204: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:46.009070: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-06 00:34:46.198698: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:34:46.200581: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:34:46.202583: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
560/560 [==============================] - ETA: 0s - loss: 0.1903 - accuracy: 0.5814 - mean_pred: 0.0938 - fmeasure: 0.4571 - precision: 0.6856 - recall: 0.3568
2023-04-06 00:48:53.886171: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:48:53.888145: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:48:53.889714: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
2023-04-06 00:48:54.065088: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32
[[{{node gradients/split_2_grad/concat/split_2/split_dim}}]]
2023-04-06 00:48:54.067141: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32
[[{{node gradients/split_grad/concat/split/split_dim}}]]
2023-04-06 00:48:54.069018: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32
[[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
560/560 [==============================] - 869s 2s/step - loss: 0.1903 - accuracy: 0.5814 - mean_pred: 0.0938 - fmeasure: 0.4571 - precision: 0.6856 - recall: 0.3568 - val_loss: 0.1571 - val_accuracy: 0.6626 - val_mean_pred: 0.0807 - val_fmeasure: 0.5860 - val_precision: 0.7702 - val_recall: 0.4733 Epoch 2/25 560/560 [==============================] - 870s 2s/step - loss: 0.1528 - accuracy: 0.6684 - mean_pred: 0.0811 - fmeasure: 0.5972 - precision: 0.7683 - recall: 0.4892 - val_loss: 0.1434 - val_accuracy: 0.6938 - val_mean_pred: 0.0799 - val_fmeasure: 0.6310 - val_precision: 0.7942 - val_recall: 0.5239 Epoch 3/25 560/560 [==============================] - 866s 2s/step - loss: 0.1389 - accuracy: 0.7024 - mean_pred: 0.0810 - fmeasure: 0.6427 - precision: 0.7927 - recall: 0.5410 - val_loss: 0.1274 - val_accuracy: 0.7345 - val_mean_pred: 0.0802 - val_fmeasure: 0.6737 - val_precision: 0.8380 - val_recall: 0.5638 Epoch 4/25 560/560 [==============================] - 865s 2s/step - loss: 0.1270 - accuracy: 0.7299 - mean_pred: 0.0810 - fmeasure: 0.6791 - precision: 0.8129 - recall: 0.5835 - val_loss: 0.1161 - val_accuracy: 0.7598 - val_mean_pred: 0.0816 - val_fmeasure: 0.7076 - val_precision: 0.8554 - val_recall: 0.6038 Epoch 5/25 560/560 [==============================] - 867s 2s/step - loss: 0.1168 - accuracy: 0.7502 - mean_pred: 0.0810 - fmeasure: 0.7069 - precision: 0.8285 - recall: 0.6169 - val_loss: 0.1058 - val_accuracy: 0.7761 - val_mean_pred: 0.0820 - val_fmeasure: 0.7378 - val_precision: 0.8676 - val_recall: 0.6422 Epoch 6/25 560/560 [==============================] - 865s 2s/step - loss: 0.1075 - accuracy: 0.7693 - mean_pred: 0.0809 - fmeasure: 0.7339 - precision: 0.8444 - recall: 0.6494 - val_loss: 0.0959 - val_accuracy: 0.8016 - val_mean_pred: 0.0803 - val_fmeasure: 0.7657 - val_precision: 0.8854 - val_recall: 0.6749 Epoch 7/25 560/560 [==============================] - 946s 2s/step - loss: 0.0987 - accuracy: 0.7873 - mean_pred: 0.0809 - fmeasure: 0.7582 - precision: 0.8586 - recall: 0.6792 - val_loss: 0.0875 - val_accuracy: 0.8223 - val_mean_pred: 0.0807 - val_fmeasure: 0.7904 - val_precision: 0.8927 - val_recall: 0.7095 Epoch 8/25 560/560 [==============================] - 1075s 2s/step - loss: 0.0908 - accuracy: 0.8024 - mean_pred: 0.0809 - fmeasure: 0.7798 - precision: 0.8714 - recall: 0.7059 - val_loss: 0.0797 - val_accuracy: 0.8270 - val_mean_pred: 0.0814 - val_fmeasure: 0.8119 - val_precision: 0.9022 - val_recall: 0.7384 Epoch 9/25 560/560 [==============================] - 1016s 2s/step - loss: 0.0833 - accuracy: 0.8167 - mean_pred: 0.0809 - fmeasure: 0.7998 - precision: 0.8835 - recall: 0.7310 - val_loss: 0.0722 - val_accuracy: 0.8422 - val_mean_pred: 0.0821 - val_fmeasure: 0.8339 - val_precision: 0.9103 - val_recall: 0.7697 Epoch 10/25 560/560 [==============================] - 973s 2s/step - loss: 0.0766 - accuracy: 0.8287 - mean_pred: 0.0809 - fmeasure: 0.8179 - precision: 0.8929 - recall: 0.7548 - val_loss: 0.0661 - val_accuracy: 0.8525 - val_mean_pred: 0.0824 - val_fmeasure: 0.8513 - val_precision: 0.9197 - val_recall: 0.7926 Epoch 11/25 560/560 [==============================] - 935s 2s/step - loss: 0.0700 - accuracy: 0.8396 - mean_pred: 0.0809 - fmeasure: 0.8355 - precision: 0.9040 - recall: 0.7769 - val_loss: 0.0596 - val_accuracy: 0.8585 - val_mean_pred: 0.0811 - val_fmeasure: 0.8664 - val_precision: 0.9354 - val_recall: 0.8073 Epoch 12/25 560/560 [==============================] - 923s 2s/step - loss: 0.0640 - accuracy: 0.8491 - mean_pred: 0.0809 - fmeasure: 0.8512 - precision: 0.9130 - recall: 0.7976 - val_loss: 0.0541 - val_accuracy: 0.8629 - val_mean_pred: 0.0828 - val_fmeasure: 0.8817 - val_precision: 0.9347 - val_recall: 0.8347 Epoch 13/25 560/560 [==============================] - 926s 2s/step - loss: 0.0583 - accuracy: 0.8589 - mean_pred: 0.0809 - fmeasure: 0.8656 - precision: 0.9204 - recall: 0.8173 - val_loss: 0.0489 - val_accuracy: 0.8717 - val_mean_pred: 0.0833 - val_fmeasure: 0.8962 - val_precision: 0.9399 - val_recall: 0.8566 Epoch 14/25 560/560 [==============================] - 879s 2s/step - loss: 0.0535 - accuracy: 0.8651 - mean_pred: 0.0809 - fmeasure: 0.8785 - precision: 0.9277 - recall: 0.8345 - val_loss: 0.0455 - val_accuracy: 0.8746 - val_mean_pred: 0.0817 - val_fmeasure: 0.9028 - val_precision: 0.9463 - val_recall: 0.8632 Epoch 15/25 560/560 [==============================] - 856s 2s/step - loss: 0.0492 - accuracy: 0.8703 - mean_pred: 0.0809 - fmeasure: 0.8890 - precision: 0.9334 - recall: 0.8490 - val_loss: 0.0425 - val_accuracy: 0.8753 - val_mean_pred: 0.0829 - val_fmeasure: 0.9096 - val_precision: 0.9441 - val_recall: 0.8777 Epoch 16/25 560/560 [==============================] - 854s 2s/step - loss: 0.0449 - accuracy: 0.8758 - mean_pred: 0.0808 - fmeasure: 0.8994 - precision: 0.9398 - recall: 0.8626 - val_loss: 0.0380 - val_accuracy: 0.8824 - val_mean_pred: 0.0831 - val_fmeasure: 0.9225 - val_precision: 0.9520 - val_recall: 0.8950 Epoch 17/25 560/560 [==============================] - 857s 2s/step - loss: 0.0413 - accuracy: 0.8792 - mean_pred: 0.0809 - fmeasure: 0.9085 - precision: 0.9446 - recall: 0.8752 - val_loss: 0.0341 - val_accuracy: 0.8885 - val_mean_pred: 0.0811 - val_fmeasure: 0.9289 - val_precision: 0.9625 - val_recall: 0.8977 Epoch 18/25 560/560 [==============================] - 860s 2s/step - loss: 0.0376 - accuracy: 0.8841 - mean_pred: 0.0808 - fmeasure: 0.9172 - precision: 0.9497 - recall: 0.8872 - val_loss: 0.0319 - val_accuracy: 0.8822 - val_mean_pred: 0.0817 - val_fmeasure: 0.9353 - val_precision: 0.9618 - val_recall: 0.9103 Epoch 19/25 560/560 [==============================] - 859s 2s/step - loss: 0.0343 - accuracy: 0.8894 - mean_pred: 0.0808 - fmeasure: 0.9257 - precision: 0.9548 - recall: 0.8986 - val_loss: 0.0283 - val_accuracy: 0.8922 - val_mean_pred: 0.0822 - val_fmeasure: 0.9431 - val_precision: 0.9673 - val_recall: 0.9202 Epoch 20/25 560/560 [==============================] - 856s 2s/step - loss: 0.0324 - accuracy: 0.8897 - mean_pred: 0.0808 - fmeasure: 0.9299 - precision: 0.9567 - recall: 0.9049 - val_loss: 0.0271 - val_accuracy: 0.8886 - val_mean_pred: 0.0813 - val_fmeasure: 0.9445 - val_precision: 0.9681 - val_recall: 0.9221 Epoch 21/25 560/560 [==============================] - 856s 2s/step - loss: 0.0295 - accuracy: 0.8924 - mean_pred: 0.0808 - fmeasure: 0.9365 - precision: 0.9606 - recall: 0.9138 - val_loss: 0.0248 - val_accuracy: 0.8891 - val_mean_pred: 0.0830 - val_fmeasure: 0.9509 - val_precision: 0.9662 - val_recall: 0.9362 Epoch 22/25 560/560 [==============================] - 854s 2s/step - loss: 0.0272 - accuracy: 0.8928 - mean_pred: 0.0808 - fmeasure: 0.9419 - precision: 0.9637 - recall: 0.9212 - val_loss: 0.0232 - val_accuracy: 0.8976 - val_mean_pred: 0.0809 - val_fmeasure: 0.9527 - val_precision: 0.9737 - val_recall: 0.9328 Epoch 23/25 560/560 [==============================] - 854s 2s/step - loss: 0.0253 - accuracy: 0.8970 - mean_pred: 0.0808 - fmeasure: 0.9461 - precision: 0.9660 - recall: 0.9272 - val_loss: 0.0213 - val_accuracy: 0.8962 - val_mean_pred: 0.0818 - val_fmeasure: 0.9570 - val_precision: 0.9714 - val_recall: 0.9432 Epoch 24/25 560/560 [==============================] - 854s 2s/step - loss: 0.0231 - accuracy: 0.8972 - mean_pred: 0.0808 - fmeasure: 0.9516 - precision: 0.9695 - recall: 0.9344 - val_loss: 0.0192 - val_accuracy: 0.8910 - val_mean_pred: 0.0823 - val_fmeasure: 0.9632 - val_precision: 0.9768 - val_recall: 0.9501 Epoch 25/25 560/560 [==============================] - 857s 2s/step - loss: 0.0215 - accuracy: 0.8998 - mean_pred: 0.0808 - fmeasure: 0.9549 - precision: 0.9712 - recall: 0.9394 - val_loss: 0.0189 - val_accuracy: 0.8963 - val_mean_pred: 0.0815 - val_fmeasure: 0.9624 - val_precision: 0.9752 - val_recall: 0.9500
from chart_studio import plotly
import plotly.offline as py
import plotly.graph_objs as go
#Computing the highest of the evaluation matrics (per model)
trace = go.Table(
header=dict(values=['Model', 'Loss', 'Accuracy', 'mean_pred', 'F-Measure', 'Precision', 'Recall'],
line = dict(color='#7D7F80'),
fill = dict(color='#a1c3d1'),
align = ['left'] * 5),
cells=dict(values=[['CNN-LSTM', 'LSTM-GRU'],
[
#Loss Evaluation
round(np.max(CNN_LSTM_model_fit.history['loss']), 3),
round(np.max(LSTM_GRU_model_fit.history['loss']), 3)],
#Accuracy Evaluation
[round(np.max(CNN_LSTM_model_fit.history['accuracy']), 3),
round(np.max(LSTM_GRU_model_fit.history['accuracy']), 3)],
#mean_pred Evaluation
[round(np.max(CNN_LSTM_model_fit.history['mean_pred']), 3),
round(np.max(LSTM_GRU_model_fit.history['mean_pred']), 3)],
#F1-Measure Evaluation
[round(np.max(CNN_LSTM_model_fit.history['fmeasure']), 3),
round(np.max(LSTM_GRU_model_fit.history['fmeasure']), 3)],
#Precision Evaluation
[round(np.max(CNN_LSTM_model_fit.history['precision']), 3),
round(np.max(LSTM_GRU_model_fit.history['precision']), 3)],
#Recall Evaluation
[round(np.max(CNN_LSTM_model_fit.history['recall']), 3),
round(np.max(LSTM_GRU_model_fit.history['recall']), 3)]
],
line = dict(color='#7D7F80'),
fill = dict(color='#EDFAFF'),
align = ['left'] * 5))
layout = dict(width=800, height=400)
data = [trace]
fig = dict(data=data, layout=layout)
py.iplot(data, filename = 'multi-label_with the max of the evaluation matrics (per model) _table')
#Computing the mean of the evaluation matrics (per model)
trace = go.Table(
header=dict(values=['Model', 'Loss', 'Accuracy', 'mean_pred', 'F-Measure', 'Precision', 'Recall'],
line = dict(color='#7D7F80'),
fill = dict(color='#a1c3d1'),
align = ['left'] * 5),
cells=dict(values=[['CNN-LSTM', 'LSTM-GRU'],
[
#Loss Evaluation
round(np.mean(CNN_LSTM_model_fit.history['loss']), 3),
round(np.mean(LSTM_GRU_model_fit.history['loss']), 3)],
#Accuracy Evaluation
[round(np.mean(CNN_LSTM_model_fit.history['accuracy']), 3),
round(np.mean(LSTM_GRU_model_fit.history['accuracy']), 3)],
#mean_pred Evaluation
[round(np.mean(CNN_LSTM_model_fit.history['mean_pred']), 3),
round(np.mean(LSTM_GRU_model_fit.history['mean_pred']), 3)],
#F1-Measure Evaluation
[round(np.mean(CNN_LSTM_model_fit.history['fmeasure']), 3),
round(np.mean(LSTM_GRU_model_fit.history['fmeasure']), 3)],
#Precision Evaluation
[round(np.mean(CNN_LSTM_model_fit.history['precision']), 3),
round(np.mean(LSTM_GRU_model_fit.history['precision']), 3)],
#Recall Evaluation
[round(np.mean(CNN_LSTM_model_fit.history['recall']), 3),
round(np.mean(LSTM_GRU_model_fit.history['recall']), 3)]
],
line = dict(color='#7D7F80'),
fill = dict(color='#EDFAFF'),
align = ['left'] * 5))
layout = dict(width=800, height=400)
data = [trace]
fig = dict(data=data, layout=layout)
py.iplot(data, filename = 'multi-label_with the mean of the evaluation matrics (per model) _table')